}
if ( xc_ia64_get_pfn_list(xc_handle, domid,
- page_array, IO_PAGE_START >> PAGE_SHIFT, 1) != 1 )
+ page_array,
+ ram_pages + (GFW_SIZE >> PAGE_SHIFT), 1) != 1 )
{
fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
exit(-1);
goto error_out;
}
if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array,
- dst_pfn>>PAGE_SHIFT, nr_pages) != nr_pages ){
+ dst_pfn, nr_pages) != nr_pages ){
PERROR("Could not get the page frame list");
goto error_out;
}
#define HOB_SIGNATURE 0x3436474953424f48 // "HOBSIG64"
-#define GFW_HOB_START ((4UL<<30)-(14UL<<20)) //4G -14M
-#define GFW_HOB_SIZE (1UL<<20) //1M
-#define MEM_G (1UL << 30)
-#define MEM_M (1UL << 20)
+#define GFW_HOB_START ((4UL<<30)-(14UL<<20)) //4G -14M
+#define GFW_HOB_SIZE (1UL<<20) //1M
+#define RAW_GFW_START_NR(s) ((s) >> PAGE_SHIFT)
+#define RAW_GFW_HOB_START_NR(s) \
+ (RAW_GFW_START_NR(s) + ((GFW_HOB_START - GFW_START) >> PAGE_SHIFT))
+#define RAW_GFW_IMAGE_START_NR(s,i) \
+ (RAW_GFW_START_NR(s) + (((GFW_SIZE - (i))) >> PAGE_SHIFT))
+#define RAW_IO_PAGE_START_NR(s) \
+ (RAW_GFW_START_NR(s) + (GFW_SIZE >> PAGE_SHIFT))
+#define RAW_STORE_PAGE_START_NR(s) \
+ (RAW_IO_PAGE_START_NR(s) + (IO_PAGE_SIZE >> PAGE_SHFIT))
typedef struct {
unsigned long signature;
static int add_mem_hob(void* hob_buf, unsigned long dom_mem_size);
static int build_hob (void* hob_buf, unsigned long hob_buf_size,
unsigned long dom_mem_size);
-static int load_hob(int xc_handle,uint32_t dom, void *hob_buf);
+static int load_hob(int xc_handle,uint32_t dom, void *hob_buf,
+ unsigned long dom_mem_size);
int xc_ia64_build_hob(int xc_handle, uint32_t dom, unsigned long memsize){
return -1;
}
- if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize<<20) < 0){
+ if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize) < 0){
free (hob_buf);
PERROR("Could not build hob");
return -1;
}
- if ( load_hob( xc_handle, dom, hob_buf) <0){
+ if ( load_hob( xc_handle, dom, hob_buf, memsize) < 0){
free (hob_buf);
PERROR("Could not load hob");
return -1;
}
static int
-load_hob(int xc_handle, uint32_t dom, void *hob_buf)
+load_hob(int xc_handle, uint32_t dom, void *hob_buf,
+ unsigned long dom_mem_size)
{
// hob_buf should be page aligned
int hob_size;
nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT;
return xc_ia64_copy_to_domain_pages(xc_handle, dom,
- hob_buf, GFW_HOB_START, nr_pages );
+ hob_buf, RAW_GFW_HOB_START_NR(dom_mem_size), nr_pages );
}
#define MIN(x, y) ((x) < (y)) ? (x) : (y)
unsigned long page_array[2];
shared_iopage_t *sp;
int i;
+ unsigned long dom_memsize = (memsize << 20);
- // FIXME: initialize pfn list for a temp hack
- if (xc_ia64_get_pfn_list(xc_handle, dom, NULL, -1, -1) == -1) {
- PERROR("Could not allocate continuous memory");
- goto error_out;
- }
-
if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
PERROR("Guest firmware size is incorrect [%ld]?", image_size);
return -1;
/* Load guest firmware */
if( xc_ia64_copy_to_domain_pages( xc_handle, dom,
- image, 4*MEM_G-image_size, image_size>>PAGE_SHIFT)) {
+ image, RAW_GFW_IMAGE_START_NR(dom_memsize, image_size),
+ image_size>>PAGE_SHIFT)) {
PERROR("Could not load guest firmware into domain");
goto error_out;
}
/* Hand-off state passed to guest firmware */
- if (xc_ia64_build_hob(xc_handle, dom, memsize) < 0){
+ if (xc_ia64_build_hob(xc_handle, dom, dom_memsize) < 0){
PERROR("Could not build hob\n");
goto error_out;
}
/* Retrieve special pages like io, xenstore, etc. */
- if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, IO_PAGE_START>>PAGE_SHIFT, 2) != 2 )
+ if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
+ RAW_IO_PAGE_START_NR(dom_memsize), 2) != 2 )
{
PERROR("Could not get the page frame list");
goto error_out;
#define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT))
#define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
-int vmx_alloc_contig_pages(struct domain *d)
+int vmx_build_physmap_table(struct domain *d)
{
- unsigned long i, j, start,tmp, end, pgnr, conf_nr;
- struct page_info *page;
+ unsigned long i, j, start, tmp, end, mfn;
struct vcpu *v = d->vcpu[0];
+ struct list_head *list_ent = d->page_list.next;
+ ASSERT(!d->arch.physmap_built);
ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
+ ASSERT(d->max_pages == d->tot_pages);
/* Mark I/O ranges */
for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
assign_domain_page(d, j, io_ranges[i].type);
}
- conf_nr = VMX_CONFIG_PAGES(d);
- if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1)))
- panic("vti domain needs 128M memory at least\n");
-/*
- order = get_order_from_pages(conf_nr);
- if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
- printk("Could not allocate order=%d pages for vmx contig alloc\n",
- order);
- return -1;
+ /* Map normal memory below 3G */
+ end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT;
+ tmp = end < MMIO_START ? end : MMIO_START;
+ for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) {
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ assign_domain_page(d, i, mfn << PAGE_SHIFT);
+ list_ent = mfn_to_page(mfn)->list.next;
}
-*/
-
-/* reserve contiguous 64M for linux kernel */
-
- if (unlikely((page = alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- for (i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){
- assign_domain_page(d, i, pgnr << PAGE_SHIFT);
- }
-
- for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){
- if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- assign_domain_page(d, i, pgnr << PAGE_SHIFT);
- }
+ ASSERT(list_ent != &d->page_list);
- /* Map normal memory below 3G */
- end = conf_nr << PAGE_SHIFT;
- tmp = end < MMIO_START ? end : MMIO_START;
- for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){
- if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- assign_domain_page(d, i, pgnr << PAGE_SHIFT);
- }
/* Map normal memory beyond 4G */
if (unlikely(end > MMIO_START)) {
start = 4 * MEM_G;
end = start + (end - 3 * MEM_G);
- for (i = start; i < end; i += PAGE_SIZE){
- if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+ for (i = start; (i < end) &&
+ (list_ent != &d->page_list); i += PAGE_SIZE) {
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ assign_domain_page(d, i, mfn << PAGE_SHIFT);
+ list_ent = mfn_to_page(mfn)->list.next;
+ }
+ ASSERT(list_ent != &d->page_list);
}
- }
-
- d->arch.max_pfn = end >> PAGE_SHIFT;
-/*
- order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
- if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
- printk("Could not allocate order=%d pages for vmx contig alloc\n",
- order);`
- return -1;
- }
-*/
+
/* Map guest firmware */
- for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){
- if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- assign_domain_page(d, i, pgnr << PAGE_SHIFT);
- }
-
-/*
- if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
- printk("Could not allocate order=1 pages for vmx contig alloc\n");
- return -1;
+ for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
+ (list_ent != &d->page_list); i += PAGE_SIZE) {
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ assign_domain_page(d, i, mfn << PAGE_SHIFT);
+ list_ent = mfn_to_page(mfn)->list.next;
}
-*/
+ ASSERT(list_ent != &d->page_list);
+
/* Map for shared I/O page and xenstore */
- if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
-
- if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
- printk("No enough memory for vti domain!!!\n");
- return -1;
- }
- pgnr = page_to_mfn(page);
- assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
+ mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
+ assign_domain_page(d, IO_PAGE_START, mfn << PAGE_SHIFT);
+ list_ent = mfn_to_page(mfn)->list.next;
+ ASSERT(list_ent != &d->page_list);
+ mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
+ assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
+ list_ent = mfn_to_page(mfn)->list.next;
+ ASSERT(list_ent == &d->page_list);
+
+ d->arch.max_pfn = end >> PAGE_SHIFT;
+ d->arch.physmap_built = 1;
set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
return 0;
}
void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
{
ASSERT(d != dom0); /* only for non-privileged vti domain */
+
+ if (!d->arch.physmap_built)
+ vmx_build_physmap_table(d);
+
d->arch.vmx_platform.shared_page_va =
(unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
/* TEMP */
*/
case DOM0_GETMEMLIST:
{
- unsigned long i;
+ unsigned long i = 0;
struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
unsigned long mfn;
+ struct list_head *list_ent;
ret = -EINVAL;
if ( d != NULL )
{
ret = 0;
- /* A temp trick here. When max_pfns == -1, we assume
- * the request is for machine contiguous pages, so request
- * all pages at first query
- */
- if ( (op->u.getmemlist.max_pfns == -1UL) &&
- !test_bit(ARCH_VMX_CONTIG_MEM,
- &d->vcpu[0]->arch.arch_vmx.flags) ) {
- ret = (long) vmx_alloc_contig_pages(d);
- put_domain(d);
- return ret ? (-ENOMEM) : 0;
+ list_ent = d->page_list.next;
+ while ( (i != start_page) && (list_ent != &d->page_list)) {
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ i++;
+ list_ent = mfn_to_page(mfn)->list.next;
}
- for ( i = start_page; i < (start_page + nr_pages); i++ )
+ if (i == start_page)
{
- mfn = gmfn_to_mfn_foreign(d, i);
+ while((i < (start_page + nr_pages)) &&
+ (list_ent != &d->page_list))
+ {
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
- if ( copy_to_guest_offset(op->u.getmemlist.buffer,
+ if ( copy_to_guest_offset(op->u.getmemlist.buffer,
i - start_page, &mfn, 1) )
- {
- ret = -EFAULT;
- break;
+ {
+ ret = -EFAULT;
+ break;
+ }
+ i++;
+ list_ent = mfn_to_page(mfn)->list.next;
}
- }
+ } else
+ ret = -ENOMEM;
op->u.getmemlist.num_pfns = i - start_page;
copy_to_guest(u_dom0_op, op, 1);
extern void serial_input_init(void);
static void init_switch_stack(struct vcpu *v);
+void build_physmap_table(struct domain *d);
/* this belongs in include/asm, but there doesn't seem to be a suitable place */
void arch_domain_destroy(struct domain *d)
memset(d->arch.mm, 0, sizeof(*d->arch.mm));
INIT_LIST_HEAD(&d->arch.mm->pt_list);
+ d->arch.physmap_built = 0;
if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL)
goto fail_nomem;
vmx_setup_platform(d, c);
vmx_final_setup_guest(v);
- }
+ } else if (!d->arch.physmap_built)
+ build_physmap_table(d);
*regs = c->regs;
if (v == d->vcpu[0]) {
*(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
}
}
-#if 0
-/* map a physical address with specified I/O flag */
-void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
-{
- struct mm_struct *mm = d->arch.mm;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t io_pte;
- if (!mm->pgd) {
- printk("assign_domain_page: domain pgd must exist!\n");
- return;
- }
- ASSERT(flags & GPFN_IO_MASK);
-
- pgd = pgd_offset(mm,mpaddr);
- if (pgd_none(*pgd))
- pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+void build_physmap_table(struct domain *d)
+{
+ struct list_head *list_ent = d->page_list.next;
+ unsigned long mfn, i = 0;
- pud = pud_offset(pgd, mpaddr);
- if (pud_none(*pud))
- pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+ ASSERT(!d->arch.physmap_built);
+ while(list_ent != &d->page_list) {
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ assign_domain_page(d, i << PAGE_SHIFT, mfn << PAGE_SHIFT);
- pmd = pmd_offset(pud, mpaddr);
- if (pmd_none(*pmd))
- pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
-// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
-
- pte = pte_offset_map(pmd, mpaddr);
- if (pte_none(*pte)) {
- pte_val(io_pte) = flags;
- set_pte(pte, io_pte);
+ i++;
+ list_ent = mfn_to_page(mfn)->list.next;
}
- else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+ d->arch.physmap_built = 1;
}
-#endif
+
void mpafoo(unsigned long mpaddr)
{
extern unsigned long privop_trace;
return *(unsigned long *)pte;
}
#endif
-tryagain:
if (pgd_present(*pgd)) {
pud = pud_offset(pgd,mpaddr);
if (pud_present(*pud)) {
}
}
}
- /* if lookup fails and mpaddr is "legal", "create" the page */
if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
- if (assign_new_domain_page(d,mpaddr)) goto tryagain;
- }
- printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
- mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
+ printk("lookup_domain_mpa: non-allocated mpa 0x%lx (< 0x%lx)\n",
+ mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
+ } else
+ printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
+ mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
mpafoo(mpaddr);
return 0;
}
break;
case __HYPERVISOR_memory_op:
- /* we don't handle reservations; just return success */
- {
- struct xen_memory_reservation reservation;
- void *arg = (void *) regs->r15;
-
- switch(regs->r14) {
- case XENMEM_increase_reservation:
- case XENMEM_decrease_reservation:
- if (copy_from_user(&reservation, arg,
- sizeof(reservation)))
- regs->r8 = -EFAULT;
- else
- regs->r8 = reservation.nr_extents;
- break;
- default:
- regs->r8 = do_memory_op((int) regs->r14, guest_handle_from_ptr(regs->r15, void));
- break;
- }
- }
+ regs->r8 = do_memory_op(regs->r14,
+ guest_handle_from_ptr(regs->r15, void));
break;
case __HYPERVISOR_event_channel_op:
break;
case __HYPERVISOR_grant_table_op:
- regs->r8 = do_grant_table_op((unsigned int) regs->r14, guest_handle_from_ptr(regs->r15, void), (unsigned int) regs->r16);
+ regs->r8 = do_grant_table_op((unsigned int) regs->r14,
+ guest_handle_from_ptr(regs->r15, void),
+ (unsigned int) regs->r16);
break;
case __HYPERVISOR_console_io:
- regs->r8 = do_console_io((int) regs->r14, (int) regs->r15, guest_handle_from_ptr(regs->r16, char));
+ regs->r8 = do_console_io((int) regs->r14, (int) regs->r15,
+ guest_handle_from_ptr(regs->r16, char));
break;
case __HYPERVISOR_xen_version:
- regs->r8 = do_xen_version((int) regs->r14, guest_handle_from_ptr(regs->r15, void));
+ regs->r8 = do_xen_version((int) regs->r14,
+ guest_handle_from_ptr(regs->r15, void));
break;
case __HYPERVISOR_multicall:
- regs->r8 = do_multicall(guest_handle_from_ptr(regs->r14, multicall_entry_t), (unsigned int) regs->r15);
+ regs->r8 = do_multicall(guest_handle_from_ptr(regs->r14,
+ multicall_entry_t), (unsigned int) regs->r15);
break;
default:
int rid_bits; /* number of virtual rid bits (default: 18) */
int breakimm;
+ int physmap_built; /* Whether is physmap built or not */
int imp_va_msb;
/* System pages out of guest memory, like for xenstore/console */
unsigned long sys_pgnr;
extern void vmx_save_state(struct vcpu *v);
extern void vmx_load_state(struct vcpu *v);
extern void show_registers(struct pt_regs *regs);
-extern int vmx_alloc_contig_pages(struct domain *d);
+extern int vmx_build_physmap_table(struct domain *d);
extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
extern void sync_split_caches(void);
extern void vmx_virq_line_assist(struct vcpu *v);